Android Media 实战一:音视频的录制(一)

Posted by alonealice on 2020-11-01

相机预览

创建一个CameraGLSurfaceView并继承自GLSurfaceView,同时在构造方法中初始化openGl环境。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
public CameraGLSurfaceView(Context context) {
super(context);
init(context);
}

public CameraGLSurfaceView(Context context, AttributeSet attrs) {
super(context, attrs);
init(context);
}


//初始化opengl环境
public void init(Context context) {
this.context = context;
surfaceRenderer = new SurfaceRenderer(this);
//GLES 2.0, API >= 8
setEGLContextClientVersion(2);
setRenderer(surfaceRenderer);
// 设置RENDERMODE_WHEN_DIRTY可以减少性能消耗
//setRenderMode(GLSurfaceView.RENDERMODE_WHEN_DIRTY);
}

其中的SurfaceRenderer实现接口GLSurfaceView.Renderer和 SurfaceTexture.OnFrameAvailableListener。

设置相机的初始化代码:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
public void startPreview(int width, int height) {
initCamera(width, height);
if (camera == null) {
return;
}
setVideoSize(camera.getParameters().getPreviewSize().width, camera.getParameters().getPreviewSize().height);
SurfaceTexture surfaceTexture = surfaceRenderer.getSurfaceTexture();
try {
surfaceTexture.setDefaultBufferSize(camera.getParameters().getPreviewSize().width, camera.getParameters().getPreviewSize().height);
//相机和opengl纹理绑定
camera.setPreviewTexture(surfaceTexture);
} catch (IOException e) {
e.printStackTrace();
}
if (camera != null) {
camera.startPreview();
}
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
private void initCamera(int width, int height) {
if (camera == null) {
camera = android.hardware.Camera.open(0);
Camera.Parameters parameters = camera.getParameters();
//设置视频模式对焦
parameters.setFocusMode(Camera.Parameters.FOCUS_MODE_CONTINUOUS_VIDEO);

//设置帧率范围
List<int[]> supportedPreviewFpsRange = parameters.getSupportedPreviewFpsRange();
parameters.setPreviewFpsRange(supportedPreviewFpsRange.get(0)[0], supportedPreviewFpsRange.get(0)[1]);

parameters.setRecordingHint(true);

Camera.Size closestSupportedSize = getClosestSupportedSize(parameters.getSupportedPreviewSizes(), width, height);
parameters.setPreviewSize(closestSupportedSize.width, closestSupportedSize.height);

Camera.Size closestPictureSize = getClosestSupportedSize(parameters.getSupportedPictureSizes(), width, height);
parameters.setPictureSize(closestPictureSize.width, closestPictureSize.height);
setRotation(parameters);
camera.setParameters(parameters);

}
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
private final void setRotation(final Camera.Parameters params) {
final Display display = ((WindowManager) context.getSystemService(Context.WINDOW_SERVICE)).getDefaultDisplay();
final int rotation = display.getRotation();
int degrees = 0;
switch (rotation) {
case Surface.ROTATION_0:
degrees = 0;
break;
case Surface.ROTATION_90:
degrees = 90;
break;
case Surface.ROTATION_180:
degrees = 180;
break;
case Surface.ROTATION_270:
degrees = 270;
break;
}
final Camera.CameraInfo info = new android.hardware.Camera.CameraInfo();
android.hardware.Camera.getCameraInfo(0, info);
isFront = (info.facing == Camera.CameraInfo.CAMERA_FACING_FRONT);
if (isFront) { // 前置摄像头
degrees = (info.orientation + degrees) % 360;
degrees = (360 - degrees) % 360; // reverse
} else { // 后置摄像头
degrees = (info.orientation - degrees + 360) % 360;
}
camera.setDisplayOrientation(degrees);
mRotation = degrees;
}

//获取与尺寸最接近的摄像头size
private Camera.Size getClosestSupportedSize(List<Camera.Size> supportedSizes, final int requestedWidth, final int requestedHeight) {
return (Camera.Size) Collections.min(supportedSizes, new Comparator<Camera.Size>() {

private int diff(final Camera.Size size) {
return Math.abs(requestedWidth - size.width) + Math.abs(requestedHeight - size.height);
}

@Override
public int compare(final Camera.Size lhs, final Camera.Size rhs) {
return diff(lhs) - diff(rhs);
}
});
}


//设置视频尺寸
public void setVideoSize(final int width, final int height) {
if ((mRotation % 180) == 0) {
mVideoWidth = width;
mVideoHeight = height;
} else {
mVideoWidth = height;
mVideoHeight = width;
}
//调整OpenGL视口
queueEvent(() -> surfaceRenderer.updateViewport());
}

这里的这些都是简单的相机参数的初始化,不细讲。

有一个重要的点,在相机参数设置完成后,在SurfaceRenderer获取到surfaceTexture,并将相机和surfaceTexture进行绑定。那现在来看看在SurfaceRenderer重的实现。

SurfaceRenderer实现接口GLSurfaceView.Renderer和SurfaceTexture.OnFrameAvailableListener,里面有几个重要的方法实现:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
public void onSurfaceCreated(GL10 gl, EGLConfig config) {
// 摄像头渲染需要 OES_EGL_image_external extension
final String extensions = GLES20.glGetString(GLES20.GL_EXTENSIONS); // API >= 8
if (!extensions.contains("OES_EGL_image_external"))
throw new RuntimeException("This system does not support OES_EGL_image_external.");

textureId = GLDrawer2D.initTextureId();
surfaceTexture = new SurfaceTexture(textureId);
surfaceTexture.setOnFrameAvailableListener(this);

GLES20.glClearColor(1.0f, 1.0f, 0.0f, 1.0f);
mDrawer = new GLDrawer2D();
mDrawer.setMatrix(mMvpMatrix, 0);
}

在onSurfaceCreated中会检查环境,同时会创建TextureId,并根据TextureId创建SurfaceTexture,同时创建GLDrawer2D用以绘制显示画面。

1
2
3
4
5
public void onSurfaceChanged(GL10 gl, int width, int height) {
if (width == 0 || height == 0) return;
updateViewport();
cameraGLSurfaceView.startPreview(width, height);
}

在onSurfaceChanged中用以更新容器,同时调用cameraGLSurfaceView,开始预览。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
public void onDrawFrame(GL10 gl) {
GLES20.glClear(GLES20.GL_COLOR_BUFFER_BIT);
if (mIsNeedUpdateTexture) {
mIsNeedUpdateTexture = false;
//更新纹理(摄像头已经绑定该SurfaceTexture)
surfaceTexture.updateTexImage();
// 获取纹理变换矩阵
surfaceTexture.getTransformMatrix(mSurfaceTextureMatrix);
}
...
if (mDrawer != null) {
mDrawer.draw(textureId, mSurfaceTextureMatrix);
}
...
}

预览开始后会多次调用onDrawFrame方法,在这里需要更新纹理,同时调用mDrawer方法绘制内容。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
/**
* 获取视窗,即需要从opengl的画布中截出一片区域用于显示内容
*/
public void updateViewport() {
int width = cameraGLSurfaceView.getWidth();
int height = cameraGLSurfaceView.getHeight();
GLES20.glViewport(0, 0, width, height);
GLES20.glClear(GLES20.GL_COLOR_BUFFER_BIT);

int videoWidth = cameraGLSurfaceView.getVideoWidth();
int videoHeight = cameraGLSurfaceView.getVideoHeight();
if (videoWidth == 0 || videoHeight == 0) {
return;
}
Matrix.setIdentityM(mMvpMatrix, 0);
if (mDrawer != null)
mDrawer.setMatrix(mMvpMatrix, 0);
}

整个预览的逻辑流程:

1.在CameraGLSurfaceView中先初始化OpenGL的环境,并创建SurfaceRenderer绑定到GLSurfaceView;

2.SurfaceRenderer 回调方法onSurfaceCreated,当中创建textureId,并用textureId创建SurfaceTexture,同时创建GLDrawer2D;

3.SurfaceRenderer 回调方法onSurfaceChanged,在当中从OpenGL的画布中截出一片区域用于显示内容,同时调用CameraGLSurfaceView开始预览;

4.在CameraGLSurfaceView中初始化相机参数,同时从SurfaceRenderer获取到SurfaceTexture,并绑定到相机,然后开启预览;

视频编码器

创建一个MediaEncoder并实现Callable

1
2
3
4
5
6
7
8
9
public MediaEncoder(MMuxer mMuxer, MediaType type) {
this.mMuxer = mMuxer;
mMediaType = type;
}

public enum MediaType {
VIDEO,
AUDIO
}

其中的MMuxer主要用于音视频的合成。

1
2
3
4
5
6
public void init() {
mBufferInfo = new MediaCodec.BufferInfo();
prepare();
mIsInit = true;
setRecording(true);
}

里面的核心方法output:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
public void output(boolean isEos) {
ByteBuffer[] outputBuffers = null;
int count = 0;
//获取当前的index
int outputIndex = mMediaCodec.dequeueOutputBuffer(mBufferInfo, TIMEOUT_USEC);
outputBuffers = mMediaCodec.getOutputBuffers();
do {
//通道未准备好,等一会
if (outputIndex == MediaCodec.INFO_TRY_AGAIN_LATER) {
if (!isEos) {
count++;
if (count >= 5) {
break;
}
}
} else if (outputIndex == MediaCodec.INFO_OUTPUT_BUFFERS_CHANGED) {
//数据变化,更新数据
outputBuffers = mMediaCodec.getOutputBuffers();
} else if (outputIndex == MediaCodec.INFO_OUTPUT_FORMAT_CHANGED) {
//在音视频混合器中加入视频/音频轨道
addTrack();
} else {
ByteBuffer outputBuffer = outputBuffers[outputIndex];
if (outputBuffer == null) {
Log.e(TAG, "output buffer null");
return;
}
if ((mBufferInfo.flags & MediaCodec.BUFFER_FLAG_CODEC_CONFIG) != 0) {
// The codec config data was pulled out and fed to the muxer when we got
// the INFO_OUTPUT_FORMAT_CHANGED status. Ignore it.
mBufferInfo.size = 0;
}
//数据不为空
if (mBufferInfo.size != 0) {
//添加视频轨道
if (!mMuxer.isVideoTrackAdd()) {
addTrack();
}
//未开始且已经准备好了
if (!mMuxer.isStarted() && mMuxer.isPrepared()) {
mMuxer.start();
}

if (mMuxer.isStarted()) {
outputBuffer.position(mBufferInfo.offset);
outputBuffer.limit(mBufferInfo.offset + mBufferInfo.size);
// //写入指定轨道的数据
mMuxer.writeSampleData(mTrackIndex, outputBuffer, mBufferInfo);
}
}
mMediaCodec.releaseOutputBuffer(outputIndex, false);
if ((mBufferInfo.flags & MediaCodec.BUFFER_FLAG_END_OF_STREAM) != 0) {
// 停止编码器
Log.d(TAG, "output: eos coming");
mIsRecording = false;
release();
break; // out of while
}
}
//取下一帧数据
outputIndex = mMediaCodec.dequeueOutputBuffer(mBufferInfo, TIMEOUT_USEC);
} while (outputIndex >= 0);

}

根据创建的MediaCodec和BufferInfo,获取当前的通道index:outputIndex,同时获取数据缓存池outputBuffers。根据outputIndex的类型,或者添加对应的信道,或者更新数据。开启Muxer合并音视频,同时将数据缓存池outputBuffers定位到对应的位置,在Muxer中写入数据,MediaCodec输入数据,然后获取下一帧的数据,循环。

其他辅助方法:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
protected MediaCodecInfo selectCodec(String mimeType) {
int numCodecs = MediaCodecList.getCodecCount();
for (int i = 0; i < numCodecs; i++) {
MediaCodecInfo codecInfo = MediaCodecList.getCodecInfoAt(i);
if (!codecInfo.isEncoder()) {
continue;
}
String[] types = codecInfo.getSupportedTypes();
for (int j = 0; j < types.length; j++) {
if (types[j].equalsIgnoreCase(mimeType)) {
return codecInfo;
}
}
}
return null;
}

添加信道:

1
2
3
4
5
6
7
8
9
10
protected void addTrack() {
if (mMediaType == AUDIO && mMuxer.isAudioTrackAdd()) {
return;
}
if (mMediaType == VIDEO && mMuxer.isVideoTrackAdd()) {
return;
}
mMediaFormat = mMediaCodec.getOutputFormat();
mTrackIndex = mMuxer.addTrack(mMediaFormat, mMediaType);
}

再创建一个VideoEncode,继承自MediaEncoder,其主要是定义线程锁,同时可以设置整个视频全是关键帧。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
public VideoEncoder(MMuxer mMuxer) {
super(mMuxer, MediaType.VIDEO);
}


@Override
public void init() {
mLock = new ReentrantLock();
mOutputCondition = mLock.newCondition();
super.init();
}

//请求关键帧
protected void requestKeyFrame() {
if (isRecording()) {
try {
Bundle reqKeyCmd = new Bundle();
reqKeyCmd.putInt(MediaCodec.PARAMETER_KEY_REQUEST_SYNC_FRAME, 0);
mMediaCodec.setParameters(reqKeyCmd);
} catch (Exception e) {}
}
}

创建VideoSurfaceEncoder 并继承 VideoEncoder,里面主要是实现其父类未实现的方法,并创建SurfaceEncoderRenderer用以关联编码的OpenGL环境。

1
2
3
4
5
6
7
8
9
10
private SurfaceEncoderRenderer mRenderer;

private Surface mSurface;

public VideoSurfaceEncoder(MMuxer mMuxer, int width, int height) {
super(mMuxer);
mWidth = width;
mHeight = height;
mRenderer = new SurfaceEncoderRenderer();
}

setEglAndStart方法主要用于外部调用开始解码,在ThreadPool会创建一个线程池,并在线程中调用其call方法。同时将之前相机创建的EGLContext和textureId设置给VideoSurfaceEncoder,用以关联两者。

1
2
3
4
public void setEglAndStart(EGLContext eglContext, int textureId) {
mRenderer.setEglContext(eglContext, textureId, this);
ThreadPool.getInstance().run(this);
}

在call方法中会进行初始化,并且会循环调用output输出数据

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
public Object call() throws Exception {
while (!mIsInit) {
init();
}
while (mIsRecording) {
try {
mLock.lock();
//是否停止
if (isEos()) {
mMediaCodec.signalEndOfInputStream();//signalEndOfInputStream只对surface录制有效
output(true);
}
mOutputCondition.await();
output(false);
} finally {
mLock.unlock();
}
}
return null;
}
1
2
3
4
5
6
7
8
9
//signal此线程
public void singalOutput() {
try {
mLock.lock();
mOutputCondition.signal();
} finally {
mLock.unlock();
}
}

在init方法中会调用prepare,用以初始化各个参数:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
protected static final String MIME_TYPE = "video/avc";
public void prepare() {
mTrackIndex = -1;

//获取对应的MediaCodecInfo
MediaCodecInfo videoCodecInfo = selectCodec(MIME_TYPE);
if (videoCodecInfo == null) {
Log.e(TAG, "Unable to find an appropriate codec for " + MIME_TYPE);
return;
}
MediaFormat format = MediaFormat.createVideoFormat(MIME_TYPE, mWidth, mHeight);
format.setInteger(MediaFormat.KEY_COLOR_FORMAT, MediaCodecInfo.CodecCapabilities.COLOR_FormatSurface); // API >= 18
format.setInteger(MediaFormat.KEY_BIT_RATE, BIT_RATE);//设置码率
format.setInteger(MediaFormat.KEY_FRAME_RATE, FRAME_RATE);//设置帧率
//设置关键帧频率
if (!mIsAllKeyFrame) {
format.setInteger(MediaFormat.KEY_I_FRAME_INTERVAL, IFRAME_INTERVAL);
} else {
format.setInteger(MediaFormat.KEY_I_FRAME_INTERVAL, 0);//设置全关键帧
}

try {
mMediaCodec = MediaCodec.createEncoderByType(MIME_TYPE);
mMediaCodec.configure(format, null, null, MediaCodec.CONFIGURE_FLAG_ENCODE);
mSurface = mMediaCodec.createInputSurface(); // API >= 18
mMediaCodec.start();

mRenderer.setSurface(mSurface);
mRenderer.start();


} catch (Exception e) {
e.printStackTrace();
}
}

首先根据MIME_TYPE创建对应的MediaCodecInfo,然后创建MediaFormat并设置码率、帧率,再根据MIME_TYPE创建MediaCodec,获取MediaCodec的InputSurface,同时调用SurfaceEncoderRenderer的start方法。

SurfaceEncoderRenderer中的实现:

1
2
3
4
5
6
7
public void start() {
if (!(mSurface instanceof SurfaceView) && !(mSurface instanceof Surface) && !(mSurface instanceof SurfaceHolder) && !(mSurface instanceof SurfaceTexture)) {
Log.e(TAG, "unsupported surface");
}else{
ThreadPool.getInstance().run(this);
}
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
public Object call() throws Exception {
while (!mIsInitGL){
initGL();
}
while (!mIsInitGL){
return null;
}
while (mEncoder.isRecording()){
mLock.lock();
try {
Log.d(TAG, "await~~~~");
mDrawCondition.await();
mEgl.makeCurrent();
//makeCurrent表明opengl的操作是在egl环境下
// clear screen with yellow color so that you can see rendering rectangle
GLES20.glClearColor(1.0f, 1.0f, 0.0f, 1.0f);
GLES20.glClear(GLES20.GL_COLOR_BUFFER_BIT);
mDrawer.setMatrix(mMatrix, 16);
mDrawer.draw(mTextureId, mMatrix);
mEgl.swapBuffers();
mEncoder.singalOutput();//通知编码器线程要输出数据啦
Log.d(TAG, "draw------------textureId=" + mTextureId);
}finally {
mLock.unlock();
}

}
return null;
}
1
2
3
4
5
6
7
8
private void initGL() {
mEgl = new MEgl();
mEgl.init(mEglContext, false, true, mSurface);
mEgl.makeCurrent();//drawer必须要在egl.makeCurrent()后初始化,才能保证mDrawer渲染的是egl对应的surface
mDrawer = new GLDrawer2D();
mIsInitGL = true;
Log.d(TAG, "-----init egl opengl -------------");
}

在线程中执行initGL方法,用以初始化OpenGL相关参数同时绑定MediaCodec的InputSurface。同时循环更新,并通知VideoSurfaceEncoder输出数据。

音频录制

音频录制基于之前的MediaEncoder:

1
2
3
4
5
6
7
8
9
10
11
12
13
public void start() {
ThreadPool.getInstance().run(this);
}

@Override
public Object call() throws Exception {
init();
return null;
}
public void init(){
super.init();
ThreadPool.getInstance().run(mAudioThread);
}

在线程中启动,同时启动音频采集线程。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
public void prepare() {
mTrackIndex = -1;
final MediaCodecInfo audioCodecInfo = selectCodec(MIME_TYPE);
if (audioCodecInfo == null) {
Log.e(TAG, "Unable to find an appropriate codec for " + MIME_TYPE);
return;
}
final MediaFormat audioFormat = MediaFormat.createAudioFormat(MIME_TYPE, SAMPLE_RATE, 1);
audioFormat.setInteger(MediaFormat.KEY_AAC_PROFILE, MediaCodecInfo.CodecProfileLevel.AACObjectLC);
audioFormat.setInteger(MediaFormat.KEY_CHANNEL_MASK, AudioFormat.CHANNEL_IN_MONO);
audioFormat.setInteger(MediaFormat.KEY_BIT_RATE, BIT_RATE);
audioFormat.setInteger(MediaFormat.KEY_CHANNEL_COUNT, 1);
try {
mMediaCodec = MediaCodec.createEncoderByType(MIME_TYPE);
mMediaCodec.configure(audioFormat, null, null, MediaCodec.CONFIGURE_FLAG_ENCODE);
mMediaCodec.start();
mAudioThread = new AudioThread(this);
} catch (IOException e) {
e.printStackTrace();
}
}

init中的prepare方法,跟视频的prepare接近,创建对应的MediaCodecInfo和MediaFormat,同时创建AudioThread。

AudioThread中的核心方法:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
public Object call() throws Exception {
android.os.Process.setThreadPriority(android.os.Process.THREAD_PRIORITY_URGENT_AUDIO);
try {
final int min_buffer_size = AudioRecord.getMinBufferSize(
SAMPLE_RATE, AudioFormat.CHANNEL_IN_MONO,
AudioFormat.ENCODING_PCM_16BIT);

int buffer_size = SAMPLES_PER_FRAME * FRAMES_PER_BUFFER;

if (buffer_size < min_buffer_size)
buffer_size = ((min_buffer_size / SAMPLES_PER_FRAME) + 1) * SAMPLES_PER_FRAME * 2;

AudioRecord audioRecord = null;
for (int source : AUDIO_SOURCES) {
try {
audioRecord = new AudioRecord(source, SAMPLE_RATE, AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT, buffer_size);
if (audioRecord.getState() != AudioRecord.STATE_INITIALIZED)
audioRecord = null;
} catch (final Exception e) {
audioRecord = null;
}
if (audioRecord == null) {
continue;
} else {
try {
if (mAudioEncoder.isRecording()) {
Log.v(TAG, "AudioThread:start audio recording");
final ByteBuffer buf = ByteBuffer.allocateDirect(SAMPLES_PER_FRAME);
int readBytes;
audioRecord.startRecording();
try {
while (mAudioEncoder.isRecording()) {
// read audio data from internal mic
buf.clear();
readBytes = audioRecord.read(buf, SAMPLES_PER_FRAME);
if (readBytes > 0) {
// set audio data to encoder
buf.position(readBytes);
buf.flip();
//将数据传给编码器编码
mAudioEncoder.encodeAudioBuffer(buf, readBytes);
}
}
} finally {
audioRecord.stop();
}
}
} finally {
audioRecord.release();
}
}
}
} catch (Exception e) {

}
return null;
}
1
2
3
4
5
6
7
private static final int[] AUDIO_SOURCES = new int[]{
MediaRecorder.AudioSource.MIC,
MediaRecorder.AudioSource.DEFAULT,
MediaRecorder.AudioSource.CAMCORDER,
MediaRecorder.AudioSource.VOICE_COMMUNICATION,
MediaRecorder.AudioSource.VOICE_RECOGNITION,
};

首先是获取buffer_size,其次遍历所有的输入source并创建AudioRecord。同时调用startRecording开始录音,读取数据后再调用AudioEncoder.encodeAudioBuffer将数据进行保存。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
public void encodeAudioBuffer(ByteBuffer buffer, int length, boolean isEos) {
if (!isRecording()) {
return;
}
final ByteBuffer[] inputBuffers = mMediaCodec.getInputBuffers();
while (isRecording()) {
final int inputBufferIndex = mMediaCodec.dequeueInputBuffer(TIMEOUT_USEC);
if (inputBufferIndex >= 0) {
final ByteBuffer inputBuffer = inputBuffers[inputBufferIndex];
inputBuffer.clear();
if (buffer != null) {
inputBuffer.put(buffer);
}
if (isEos) {
mMediaCodec.queueInputBuffer(inputBufferIndex, 0, 0, getPTS(), MediaCodec.BUFFER_FLAG_END_OF_STREAM);
} else {
mMediaCodec.queueInputBuffer(inputBufferIndex, 0, length, getPTS(), 0);
}
output(isEos);
break;
} else if (inputBufferIndex == MediaCodec.INFO_TRY_AGAIN_LATER) {
// wait for MediaCodec encoder is ready to encode
// nothing to do here because MediaCodec#dequeueInputBuffer(TIMEOUT_USEC)
// will wait for maximum TIMEOUT_USEC(10msec) on each call
}
}
}

encodeAudioBuffer方法中将输入的数据加到MediaCodec中,然后调用output将数据合并。

启动编码器

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
public void onDrawFrame(GL10 gl) {
GLES20.glClear(GLES20.GL_COLOR_BUFFER_BIT);
if (mIsNeedUpdateTexture) {
mIsNeedUpdateTexture = false;
//更新纹理(摄像头已经绑定该SurfaceTexture)
surfaceTexture.updateTexImage();
// 获取纹理变换矩阵
surfaceTexture.getTransformMatrix(mSurfaceTextureMatrix);
}

//是否需要录制
if (mIsNeedRecord) {
if (mVideoEncoder == null) {
MMuxer mMuxer = new MMuxer(getSaveVideoPath());
mVideoEncoder = new VideoSurfaceEncoder(mMuxer, cameraGLSurfaceView.getVideoWidth(), cameraGLSurfaceView.getVideoHeight());
mAudioEncoder = new AudioEncoder(mMuxer);
mVideoEncoder.setAllKeyFrame(false);
mVideoEncoder.setEglAndStart(EGL14.eglGetCurrentContext(), textureId);
mAudioEncoder.start();
}
if (mVideoEncoder != null && mVideoEncoder.isPrepared() && mIsRecordCurrFrame) {
mVideoEncoder.render(mSurfaceTextureMatrix, mMvpMatrix);
}
mIsRecordCurrFrame = !mIsRecordCurrFrame;
}
if (mDrawer != null) {
mDrawer.draw(textureId, mSurfaceTextureMatrix);
}
if (mIsStopRecorder && mVideoEncoder != null && mAudioEncoder != null) {
mVideoEncoder.eos();
mAudioEncoder.eos();
mIsNeedRecord = false;
mVideoEncoder = null;
}
}

在SurfaceRenderer的onDrawFrame中,创建MMuxer、VideoSurfaceEncoder、AudioEncoder等,每次画面更新后调用render更新输出数据。